[IA64] Remove VHPT_ADDR
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Sat, 14 Oct 2006 23:52:09 +0000 (17:52 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Sat, 14 Oct 2006 23:52:09 +0000 (17:52 -0600)
Remove VHPT_ADDR by mapping vhpt to xen identity mapping area.
and some clean ups.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/linux-xen/entry.S
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/regionreg.c
xen/arch/ia64/xen/vhpt.c
xen/arch/ia64/xen/xenasm.S
xen/include/asm-ia64/vhpt.h
xen/include/asm-ia64/xensystem.h

index d80be440a349c97674fc86a404ddf0cb64331685..2c73fc5d2ba8192153f355d1765d1f0d7821ab21 100644 (file)
@@ -262,13 +262,15 @@ GLOBAL_ENTRY(ia64_switch_to)
 #endif
        rsm psr.ic                      // interrupts (psr.i) are already disabled here
        movl r25=PAGE_KERNEL
+       movl r26 = IA64_GRANULE_SHIFT << 2
        ;;
        srlz.d
        or r23=r25,r20                  // construct PA | page properties
-       mov r25=IA64_GRANULE_SHIFT<<2
+       ptr.d in0, r26                  // to purge dtr[IA64_TR_VHPT]
        ;;
-       mov cr.itir=r25
+       mov cr.itir=r26
        mov cr.ifa=in0                  // VA of next task...
+       srlz.d
        ;;
        mov r25=IA64_TR_CURRENT_STACK
 #ifdef XEN
index 38940c2d5643840fe921a690a3d2023113da9893..92030adf5c5dd5856f9f05e2cdb9cbb4ed2b3cbf 100644 (file)
@@ -118,13 +118,13 @@ void schedule_tail(struct vcpu *prev)
        extern char ia64_ivt;
        context_saved(prev);
 
+       ia64_disable_vhpt_walker();
        if (VMX_DOMAIN(current)) {
                vmx_do_launch(current);
                migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
                              current->processor);
        } else {
                ia64_set_iva(&ia64_ivt);
-               ia64_disable_vhpt_walker();
                load_region_regs(current);
                ia64_set_pta(vcpu_pta(current));
                vcpu_load_kernel_regs(current);
@@ -157,6 +157,8 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
     }
     if (VMX_DOMAIN(next))
        vmx_load_state(next);
+
+    ia64_disable_vhpt_walker();
     /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
     prev = ia64_switch_to(next);
 
@@ -176,7 +178,6 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
        nd = current->domain;
        if (!is_idle_domain(nd)) {
-               ia64_disable_vhpt_walker();
                load_region_regs(current);
                ia64_set_pta(vcpu_pta(current));
                vcpu_load_kernel_regs(current);
@@ -192,7 +193,6 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
                 * walker. Then all accesses happen within idle context will
                 * be handled by TR mapping and identity mapping.
                 */
-               ia64_disable_vhpt_walker();
                __ia64_per_cpu_var(current_psr_i_addr) = NULL;
                __ia64_per_cpu_var(current_psr_ic_addr) = NULL;
         }
index 612aced10592921ad30d0ee4ff3d05946a369e0f..2491c3ae62fae68d725c4b48c1740275f9ab55d5 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/vcpu.h>
 
 /* Defined in xemasm.S  */
-extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long p_vhpt);
+extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
 
 /* RID virtualization mechanism is really simple:  domains have less rid bits
    than the host and the host rid space is shared among the domains.  (Values
@@ -260,7 +260,7 @@ int set_one_rr(unsigned long rr, unsigned long val)
        } else if (rreg == 7) {
                ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
                             v->arch.privregs, v->domain->arch.shared_info_va,
-                            vcpu_vhpt_maddr(v));
+                            __va_ul(vcpu_vhpt_maddr(v)));
        } else {
                set_rr(rr,newrrv.rrval);
        }
index 5cd68f4a35ca5fddd18239f2538203f9d5bc4ebf..8b0d2090afb7ff498eb4639eb178a929f89f6db5 100644 (file)
@@ -30,7 +30,7 @@ DEFINE_PER_CPU (unsigned long, vhpt_paddr);
 DEFINE_PER_CPU (unsigned long, vhpt_pend);
 
 static void
- __vhpt_flush(unsigned long vhpt_maddr)
+__vhpt_flush(unsigned long vhpt_maddr)
 {
        struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr);
        int i;
@@ -158,8 +158,7 @@ pervcpu_vhpt_alloc(struct vcpu *v)
        v->arch.pta.ve = 1; // enable vhpt
        v->arch.pta.size = VHPT_SIZE_LOG2;
        v->arch.pta.vf = 1; // long format
-       //v->arch.pta.base = __va(v->arch.vhpt_maddr) >> 15;
-       v->arch.pta.base = VHPT_ADDR >> 15;
+       v->arch.pta.base = __va_ul(v->arch.vhpt_maddr) >> 15;
 
        vhpt_erase(v->arch.vhpt_maddr);
        smp_mb(); // per vcpu vhpt may be used by another physical cpu.
@@ -284,7 +283,8 @@ __flush_vhpt_range(unsigned long vhpt_maddr, u64 vadr, u64 addr_range)
 
        while ((long)addr_range > 0) {
                /* Get the VHPT entry.  */
-               unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
+               unsigned int off = ia64_thash(vadr) -
+                       __va_ul(vcpu_vhpt_maddr(current));
                struct vhpt_lf_entry *v = vhpt_base + off;
                v->ti_tag = INVALID_TI_TAG;
                addr_range -= PAGE_SIZE;
@@ -444,7 +444,7 @@ static void flush_tlb_vhpt_all (struct domain *d)
 void domain_flush_tlb_vhpt(struct domain *d)
 {
        /* Very heavy...  */
-       if (HAS_PERVCPU_VHPT(d) /* || VMX_DOMAIN(v) */)
+       if (HAS_PERVCPU_VHPT(d) || d->arch.is_vti)
                on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
        else
                on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
index 0f0cff9620ca88d6412a778051f7ac68d9883f76..af5f31c5b2d124e94dd124916297ff981843a34d 100644 (file)
 //                         void *shared_info,           /* in1 */
 //                         void *shared_arch_info,      /* in2 */
 //                         unsigned long shared_info_va, /* in3 */
-//                         unsigned long p_vhpt)        /* in4 */
+//                         unsigned long va_vhpt)       /* in4 */
 //Local usage:
 //  loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
 //  loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
+//  r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
 GLOBAL_ENTRY(ia64_new_rr7)
        // FIXME? not sure this unwind statement is correct...
        .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
@@ -118,16 +119,31 @@ GLOBAL_ENTRY(ia64_new_rr7)
 
        // VHPT
 #if VHPT_ENABLED
-       mov r24=VHPT_SIZE_LOG2<<2
-       movl r22=VHPT_ADDR
+#if IA64_GRANULE_SHIFT < VHPT_SIZE_LOG2
+#error "it must be that VHPT_SIZE_LOG2 <= IA64_GRANULE_SHIFT"
+#endif 
+       // unless overlaps with KERNEL_TR and IA64_TR_CURRENT_STACK
+       dep r14=0,in4,0,KERNEL_TR_PAGE_SHIFT
+       dep r15=0,in4,0,IA64_GRANULE_SHIFT
+       dep r21=0,r13,0,IA64_GRANULE_SHIFT
+       ;;
+       cmp.eq p7,p0=r17,r14
+       cmp.eq p8,p0=r15,r21
+(p7)   br.cond.sptk    .vhpt_overlaps
+(p8)   br.cond.sptk    .vhpt_overlaps
        mov r21=IA64_TR_VHPT
+       dep r22=0,r15,60,4              // physical address of
+                                       // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
+       mov r24=IA64_GRANULE_SHIFT<<2
        ;;
-       ptr.d   r22,r24
-       or r23=in4,r26                  // construct PA | page properties
+       ptr.d   r15,r24
+       or r23=r22,r26                  // construct PA | page properties
        mov cr.itir=r24
-       mov cr.ifa=r22
+       mov cr.ifa=r15
+       srlz.d
        ;;
        itr.d dtr[r21]=r23              // wire in new mapping...
+.vhpt_overlaps:        
 #endif
 
        //  Shared info
index d63a8d09b911611325318e4332d6278557b1c823..5b99762dacf8f363c76dce16f7c836ae7d603fbd 100644 (file)
@@ -79,7 +79,8 @@ vcpu_pta(struct vcpu* v)
     if (HAS_PERVCPU_VHPT(v->domain))
         return v->arch.pta.val;
 #endif
-    return VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
+    return __va_ul(__get_cpu_var(vhpt_paddr)) | (1 << 8) |
+        (VHPT_SIZE_LOG2 << 2) | VHPT_ENABLED;
 }
 
 #endif /* !__ASSEMBLY */
index d7c40e1735e9b3b3158309befc84767dd6cc0376..d966d1917b89d8cad967b3e34aa36edc1d2ecc4e 100644 (file)
@@ -22,7 +22,6 @@
 #define GATE_ADDR              KERNEL_START
 #define DEFAULT_SHAREDINFO_ADDR         0xf100000000000000
 #define PERCPU_ADDR             (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
-#define VHPT_ADDR               0xf200000000000000
 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
 #define VIRT_FRAME_TABLE_ADDR   0xf300000000000000
 #define VIRT_FRAME_TABLE_END    0xf400000000000000